Indicator grades

For each indicator, we set thresholds for “Poor”, “Fair”, and “Good” performance, set such that “Fair” corresponds to a wide range within a normal distribution of outcomes. Where available, we use data from the AWWA Benchmarking database interquartile ranges to construct these thresholds. Where not available, we use best engineering practice from the literature or other surveys. Xylem should consider purchasing this dataset annually to help construct the thresholds.

Pillar 1

Tract-level

HBI/PPI Affordability (real rate)
p1_grade_hbi <- function(x){
  grade <- NA
  grade <- dplyr::case_when(
    x < 7 ~ "1. Good - <7%",
    x >= 7 & x < 10 ~ "2. Fair - 7-10%",
    x >= 10 ~ "3. Poor - >10%"
  )
  return(grade)
}


p1_grade_ppi <- function(x){
  grade <- NA
  grade <- dplyr::case_when(
    x < 20 ~ "1. Good",
    x >= 20 & x < 35 ~ "2. Fair",
    x >= 35 ~ "3. Poor"
  )
  return(grade)
}


p1_grade_hbi_ppi <- function(x){
  grade <- NA
  grade <- dplyr::case_when(
    x == "Low Burden" ~ "1. Good",
    x == "Moderate-Low Burden" ~ "2. Fair",
    x == "Moderate-High Burden" ~ "2. Fair",
    x == "High Burden" ~ "3. Poor",
    x == "Very High Burden" ~ "3. Poor"
    
  )
  return(grade)
}

P1_overall$Grade_PPI <- p1_grade_ppi(P1_overall$PPI)
P1_tract$Grade_PPI <- p1_grade_ppi(P1_tract$PPI)

P1_overall$Grade_HBI <- p1_grade_hbi(P1_overall$HBI_size_avg)
P1_tract$Grade_HBI <- p1_grade_hbi(P1_tract$HBI_size_avg)

P1_overall$Grade_AFF <- p1_grade_hbi_ppi(P1_overall$HBI_size_avg)
P1_tract$Grade_AFF_avg <- p1_grade_hbi_ppi(P1_tract$aff_grade_avg)
P1_tract$Grade_AFF_1 <- p1_grade_hbi_ppi(P1_tract$aff_grade_1)
P1_tract$Grade_AFF_2 <- p1_grade_hbi_ppi(P1_tract$aff_grade_2)
P1_tract$Grade_AFF_3 <- p1_grade_hbi_ppi(P1_tract$aff_grade_3)
P1_tract$Grade_AFF_4 <- p1_grade_hbi_ppi(P1_tract$aff_grade_4)
P1_tract$Grade_AFF_5 <- p1_grade_hbi_ppi(P1_tract$aff_grade_5)
P1_tract$Grade_AFF_6 <- p1_grade_hbi_ppi(P1_tract$aff_grade_6)
P1_tract$Grade_AFF_7 <- p1_grade_hbi_ppi(P1_tract$aff_grade_7)

Both overall and across all census tracts, the affordability of water and sewer in Naperville can be characterized as “Good”. Naperville is the wealthiest community in the Midwest, so this might be expected. The median household income of Naperville is ~ $125,000, and the upper limit of the lowest quintile is ~$68,000 with a population with income <200% of the FPL of 5.2%. Compare to ~ $62,000, ~$22,000 and 29.5% for Chicago, IL. For the purposes of demonstrating the descriptive capabilities of the indicator, we construct an alternative HBI/PPI indicator by tripling both the PPI and HBI within Naperville census tracts across the board.

HBI/PPI Demonstrative Alternative
P1_overall$HBI_alt <- 3 * P1_overall$HBI_size_avg
P1_overall$PPI_alt <- 3 * P1_overall$PPI

P1_tract$PPI_alt <- 3 * P1_tract$PPI
P1_tract$HBI_alt <- 3 * P1_tract$HBI_size_avg

P1_overall$Grade_PPI_alt <- p1_grade_ppi(P1_overall$PPI_alt)
P1_tract$Grade_PPI_alt <- p1_grade_ppi(P1_tract$PPI_alt)

P1_overall$Grade_HBI_alt <- p1_grade_hbi(P1_overall$HBI_alt)
P1_tract$Grade_HBI_alt <- p1_grade_hbi(P1_tract$HBI_alt)

P1_overall$Grade_AFF_alt <- p1_grade_hbi_ppi(gradeAffordability(P1_overall$HBI_alt,P1_overall$PPI_alt))
P1_tract$Grade_AFF_alt <- p1_grade_hbi_ppi(gradeAffordability(P1_tract$HBI_alt,P1_tract$PPI_alt))

Below on the left we visualize the HBI, PPI, and overall grade using real data, and on the right with the demonstrated alternative data (applying Chicago’s income and poverty distributions to Naperville). A similar layout could be used for a user to compare the affordability metrics associated with an existing rate structure with a user-inputted rate structure.

m <- P1_tract %>% select(GEOID,NAME,PPI,Grade_PPI,PPI_alt,Grade_PPI_alt,
                         HBI_size_avg,Grade_HBI,HBI_alt,Grade_HBI_alt,
                         Grade_AFF_avg, Grade_AFF_alt)

m_ppi <- mapview::mapview(m,zcol="PPI",layer.name="PPI") +  mapview::mapview(b,alpha.regions=0,col.regions="red",stroke=TRUE,lwd=3,color="red",layer.name="Municipal Boundary")

m_ppi_alt <- mapview::mapview(m,zcol="PPI_alt",layer.name="Demo PPI") +  mapview::mapview(b,alpha.regions=0,col.regions="red",stroke=TRUE,lwd=3,color="red",layer.name="Municipal Boundary")

m_hbi <- mapview::mapview(m,zcol="HBI_size_avg",layer.name="HBI") +  mapview::mapview(b,alpha.regions=0,col.regions="red",stroke=TRUE,lwd=3,color="red",layer.name="Municipal Boundary")

m_hbi_alt <- mapview::mapview(m,zcol="HBI_alt",layer.name="Demo HBI") +  mapview::mapview(b,alpha.regions=0,col.regions="red",stroke=TRUE,lwd=3,color="red",layer.name="Municipal Boundary")

m_aff <- mapview::mapview(m,zcol="Grade_AFF_avg",layer.name="Affordability Grade",col.regions=brewer.pal(3,"Dark2")) +  mapview::mapview(b,alpha.regions=0,col.regions="red",stroke=TRUE,lwd=3,color="red",layer.name="Municipal Boundary")

m_aff_alt <- mapview::mapview(m,zcol="Grade_AFF_alt",layer.name="Demo Affordability Grade", col.regions=brewer.pal(3,"Dark2")) +  mapview::mapview(b,alpha.regions=0,col.regions="red",stroke=TRUE,lwd=3,color="red",layer.name="Municipal Boundary")

map1 <- sync(m_ppi,m_ppi_alt,m_hbi,m_hbi_alt,m_aff,m_aff_alt)

map1
Delinquency Rate

To set Delinquency grade thresholds, we take data from the AWWA Benchmarking database and set them to the interquartile range:

  • Good: < 2.5%
  • Fair: 2.5% - 14%
  • Poor: > 14%
P1_tract$Delinquency_rate <- P1_tract$Cutoff_Perc*3.1

P1_tract <-
  P1_tract %>%
  mutate(Grade_delinquency = case_when(
    Delinquency_rate < 2.5 ~ "1. Good - <2.5%",
    Delinquency_rate >= 2.5 & Delinquency_rate <= 14 ~ "2. Fair - 2.5-14%",
    Delinquency_rate > 14 ~ "3. Poor - >14%"
  ))

map2 <- select(P1_tract,NAME,GEOID,HBI_size_avg,PPI,aff_grade_avg, Delinquency_rate, Grade_delinquency)
mapview::mapview(map2,zcol="Grade_delinquency",
                 layer.name="Delinquency (%) Grade",
                 col.regions=brewer.pal(2,"Dark2"))
## Warning in brewer.pal(2, "Dark2"): minimal value for n is 3, returning requested palette with 3 different levels
Cutoff Rate

To set cutoff grade thresholds, we need to examine a real-world distribution of cutoff rates, which are not included in the AWWA Benchmarkind data. CA collects cutoff rate information for the Electronic Annual Report process. Below, we see that at least in CA, 25% of utilities had no shutoffs at all, while another 20% of utilities had shutoff rates between 0% and 1%. We might wish to set “0” or “1%” as the “Good” category. There are some discontinuities evident at around a shutoff rate of 10% (7% of utilities were above that) and 15% (4% of utilities above that), which we might take as our boundary between “Fair” and “Poor” cutoff performance. This would be better tuned with more nationally representative data, but as a first cut:

  • Good: <1%
  • Fair: 1-10%
  • Poor: >10%
#nothing
load("../data/cal-data.rds")
ggplot(D, aes(shutoffs_perc*100)) +
  geom_rect(mapping=aes(xmin=0,ymin=0,xmax=1,ymax=1), fill="green", alpha=0.5) +
  geom_rect(mapping=aes(xmin=1,ymin=0,xmax=10,ymax=1), fill="orange", alpha=0.5) +
  geom_rect(mapping=aes(xmin=10,ymin=0,xmax=35,ymax=1), fill="red", alpha=0.5) +
  stat_ecdf(geom="step") +
  geom_vline(xintercept=1) +
  geom_vline(xintercept=10) +
  ylab("Cumulative proportion of utilities") +
  xlab("Cutoff rate (%)")
## Warning: Removed 861 rows containing non-finite values (stat_ecdf).

Recall that we simulated shutoff rates, assuming this was related to HBI and PPI, which might be a plausible story. Below, we apply our shutoff grades.

P1_tract <-
  P1_tract %>%
  mutate(Grade_cutoff = case_when(
    Cutoff_Perc < 1 ~ "1. Good - <1%",
    Cutoff_Perc >= 1 & Cutoff_Perc < 10 ~ "2. Fair - 1-10%",
    Cutoff_Perc >= 10 ~ "3. Poo - >10%r"
  ))

map2 <- select(P1_tract,NAME,GEOID,HBI_size_avg,PPI,aff_grade_avg, Cutoff_Perc, Grade_cutoff)
mapview::mapview(map2,zcol="Grade_cutoff",
                 layer.name="Cutoff (%) Grade",
                 col.regions=brewer.pal(2,"Dark2"))
## Warning in brewer.pal(2, "Dark2"): minimal value for n is 3, returning requested palette with 3 different levels
Customer Assistance Program participation

Participation should be measured as a proportion of “eligible” households. Here we set the eligible population as the proportion of families earning less than 200% of the FPL, although this in principle could be set on a utility-by-utility or utility-by-state basis with the appropriate eligibility criteria and estimates of this population. Based on previous participation reports from the CA Low-Income Rate Assistance Program, as well as the national LIHEAP program participation among this eligible population is generally between 0% and 40%, with an interquartile range of 11-22%. We can revise this to more “round” numbers of 10% and 25%.

  • Good: >25%
  • Fair: 10-25%
  • Poor: <10%

We simulated this CAP participation rate, making assumptions related to certain demographic groups having lower participation rates than others, although most empirical evidence in the literature suggests this not to be a significant factor over the general level of outreach and application barriers to such programs. We apply the thresholds to create the 3-level grades below.

P1_tract$CAP_Percent <- (200/3)*P1_tract$CAP_Perc/P1_tract$PPI

P1_tract <-
  P1_tract %>% 
  mutate(Grade_CAP = case_when(
    CAP_Percent < 10 ~ "3. Poor - <10%",
    CAP_Percent >= 10 & CAP_Percent < 25 ~ "2. Fair - 10-25%",
    CAP_Percent >= 25 ~ "1. Good - >25%"
  ))

map3 <- select(P1_tract,NAME,GEOID,HBI_size_avg,PPI,aff_grade_avg, Cutoff_Perc, Grade_cutoff, CAP_Perc, Grade_CAP)
mapview::mapview(map3,zcol="Grade_CAP",layer.name="Garde of CAP Participation/PPI (%)", col.regions=brewer.pal(3,"Dark2"))
Appliance efficiency incentive participation rate

Annual participation in whole-home retrofit programs generally tops out at 3%, and individual appliance or irrigation/ field turf retrofits can vary between 0-20% or more. A first cut of thresholds:

  • Good: >5%
  • Fair: 1-5%
  • Poor: <1%
set.seed(324324)
P1_tract$Incentive_rate <- rnorm(length(P1_tract$GEOID),2.5,1.5)
P1_tract <-
  P1_tract %>%
  mutate(Grade_incentive = case_when(
    Incentive_rate < 1 ~ "3. Poor - <1%",
    Incentive_rate >=1 & Incentive_rate < 5 ~ "2. Fair - 1-5% ",
    Incentive_rate >= 5 ~ "1. Good - >5%"
  ))

m4 <- select(P1_tract, NAME,GEOID,Incentive_rate,Grade_incentive)
mapview::mapview(m4,zcol="Grade_incentive",layer.name="Incentive Participation grade",col.regions=brewer.pal(3,"Dark2"))
“Technical (service quality)” Complaints received/ 1,000 connections

The interquartile range for the 2018 version of the AWWA Benchmarking data for combined breaks and leaks can be used to set the ranges:

  • Good: < 1.5
  • Fair: 1.5 - 6.5
  • Poor: > 6.5
set.seed(3242324)
P1_tract$complaint_per_conn <- rnorm(length(P1_tract$GEOID),
                                     3.4,2.5)

P1_tract$complaint_per_conn[which(P1_tract$complaint_per_conn<0)]<-0


P1_tract <- 
  P1_tract %>%
  mutate(Grade_complaint_per_conn =
    case_when( 
      complaint_per_conn < 1.5 ~ "1. Good - <1.5 ",
      complaint_per_conn >= 1.5 & complaint_per_conn < 6.5 ~ "2. Fair - 1.5-6.5",
      complaint_per_conn >= 6.5 ~ "3. Poor - >6.5"
      
    )
  )
                                 
m5 <- select(P1_tract, NAME,GEOID,complaint_per_conn,Grade_complaint_per_conn)
mapview::mapview(m5,zcol="Grade_complaint_per_conn",layer.name="Complaint grade",col.regions=brewer.pal(3,"Dark2"))
% Complaints resolved

The interquartile range for the 2018 version of the AWWA Benchmarking data for combined breaks and leaks can be used to set the ranges:

  • NA: No complaints
  • Good: > 97%
  • Fair: 91% - 97%
  • Poor: < 91%
set.seed(3242324)
P1_tract$complaint_addressed_percent <- rnorm(length(P1_tract$GEOID),
                                     95,3)

P1_tract$complaint_addressed_percent[which(P1_tract$complaint_per_conn==0)] <- NA


P1_tract <- 
  P1_tract %>%
  mutate(Grade_complaint_addressed_percent =
    case_when( 
      complaint_addressed_percent > 97 ~ "1. Good - >97%",
      complaint_addressed_percent >= 91 & complaint_per_conn <= 97 ~ "2. Fair - 91-97%",
      complaint_addressed_percent < 91 ~ "3. Poor - <91%"
      
    )
  )

m6 <- select(P1_tract, NAME,GEOID,complaint_addressed_percent,Grade_complaint_addressed_percent)
mapview::mapview(m6,zcol="Grade_complaint_addressed_percent",layer.name="Complaint grade",col.regions=brewer.pal(3,"Dark2"))

Utility-level only

Disruption frequency index (Total interruptions/ customers)

Thresholds based on AWWA Benchmarking data:

  • Good: < 1
  • Fair: 1 - 4
  • Poor: > 4
Percent accounts with at least 1 boil water advisory
  • Good: 0%
  • Fair: 0-1%
  • Poor: >1%

For Naperville, this is 0 (Good)

SDWA Violations

It can be difficult to normalize water quality regulatory violations across utilities by contaminant type or population exposed. Instead we can adopt the EPA’s framework. EPA identifies utilities as “serious violators” if they have a repeated pattern of water quality violations without addressing them.

  • Good: 0 violations
  • Fair: >0 violations, but EPA not labeled as “Serious Violator” for that year
  • Poor: EPA has labeled as “Serious violator” for that year

Serious violator status and health-based violation count can be procured from EPA SDWIS

The majority of utilities in any given year have 0 health-based violations, and will be “good”.

Pillar 2

Tract Level

% of staff in tract/ % of service households in tract
  • Good: >0.5
  • Fair: 0.25 - 0.5
  • Poor: <0.25
P2_tract <- 
  P2_tract %>% 
  mutate(
    staff_count2 = staff_count/3,
    staff_share_ratio = (staff_count2/300)/(hh_type_total_countE/sum(hh_type_total_countE)),
    Grade_staff_share_ratio = case_when(
      staff_share_ratio > 0.5 ~ "1. Good - >0.5",
      staff_share_ratio > 0.25 & staff_share_ratio <= 0.5 ~ "2. Fair - 0.25-0.5",
      staff_share_ratio <= 0.25 ~ "3. Poor - <0.25"
    )
  )

tr.w2 <- select(tr.w,GEOID)
P2_tract <- left_join(tr.w2,P2_tract,by="GEOID")
mapview(P2_tract, zcol ="Grade_staff_share_ratio", layer.name = "% of staff in tract/ </break>% of service households in tract",col.regions=brewer.pal(3,"Dark2"))

Utility-level

% of staff in the Service area
  • Good: >0.5
  • Fair: 0.25 - 0.5
  • Poor: <0.25

The rest of the indicators are binary 0/1:

  • Recruitment diversity requirement
  • Local procurement requirement
  • Training and education programs offered within area

See Lens_Pillars_Overall.xlsx for more detail.

Pillar 3

Tract Level

3##### Total Community Meetings

  • Good: > 1
  • Fair: 1
  • Poor: 0
set.seed(432543)
P3_tract$meeting_count<- rbinom(length(P3_tract$GEOID),1,0.3)

P3_tract <- 
  P3_tract %>% 
  mutate(
  Grade_meetings = case_when(
      meeting_count > 1 ~ "1. Good - >1",
      meeting_count == 1 ~ "2. Fair - 1",
      meeting_count == 0 ~ "3. Poor - 0"
    )
  )
Main breaks AND leaks/ 100 mi pipe

Based on this report, main break rates can vary considerably by utility network length, pipe materials, and corrosiveness of soil, although the highest break rates by a large margin is in the combination of Cast Iron pipe with highly corrosive soils, with break rates in Cast Iron pipe in small utilities exceeding 60/ 100 mile-year. The average break rate across North America is 25-30 breaks/ 100-mi-yr. The AWWA “Distribution System Optimization Program” sets a target of <= 15 breaks/ 100-mi-yr. However, given how much the variation may be driven by very small utilities with high proportions of Cast Iron mains length, we also refer to the AWWA Benchmarking data. The interquartile range for the 2018 version for combined breaks and leaks can be used to set the ranges:

  • Good: < 5
  • Fair: 5 - 20
  • Poor: > 20
P3_tract$breaks_100mi <- P3_tract$main_breaks/P3_tract$mains_100mile

P3_tract <- 
  P3_tract %>% 
  mutate(
  Grade_breaks = case_when(
      breaks_100mi < 5 ~ "1. Good - <5",
      breaks_100mi >= 5 & meeting_count <=20 ~ "2. Fair - 5-20",
      breaks_100mi > 20 ~ "3. Poor - >20"
    )
  )
Sewer overflows/ 100 mi pipe

This is based on the 2005 Benchmarking result.

  • Good: < 2
  • Fair: 2 - 10
  • Poor: > 10
P3_tract$SSO_100mi <- P3_tract$breaks_100mi  

P3_tract <- 
  P3_tract %>% 
  mutate(
  Grade_SSO = case_when(
      SSO_100mi < 2 ~ "1. Good - <2",
      SSO_100mi >= 2 & meeting_count <= 10 ~ "2. Fair - 2-10",
      SSO_100mi > 10 ~ "3. Poor - >10"
    )
  )
Percentage service Lines lead
  • Good: 0
  • Fair: 0 - 5
  • Poor: > 5
P3_tract <- 
  P3_tract %>%
  mutate(Grade_prop_lead_lines = case_when(
    prop_lead_lines == 0 ~ "1. Good - 0%",
    prop_lead_lines > 0 & prop_lead_lines <= 0.05 ~ "2. Fair - 0-5%",
    prop_lead_lines > 0.05 ~ "3. Poor - >5%"
  ))
Percentage lead lines replaced

Points based on Lead and Copper Rule requirements, which say that if lead action levels exceed 0.015mg/l after implementing corrosion control, 7% of lead lines should be replaced annually (p.19).

  • NA: No lead lines or system meets LCR Action levels for lead in accordance with LCR sampling plan.
  • Good: >= 7
  • Fair: 0 - 7
  • Poor: 0
set.seed(485782343)
P3_tract$prop_lead_lines_replaced <-
  runif(length(P3_tract$GEOID), -0.1, 0.15)
P3_tract$prop_lead_lines_replaced[which(P3_tract$prop_lead_lines == 0)] <-
  NA

P3_tract <-
  P3_tract %>%
  mutate(
    Grade_prop_lead_lines_replaced = case_when(
      is.na(prop_lead_lines_replaced) ~ NA_character_,
      prop_lead_lines_replaced <= 0 ~ "3. Poor - 0%",
      prop_lead_lines_replaced > 0 &
        prop_lead_lines_replaced <= 0.07 ~ "2. Fair - 0-7%",
      prop_lead_lines_replaced > 0.07 ~ "1. Good - >7%"
    )
  )
Percent Network renewed (by asset value)

Based on the 2018 AWWA Benchmarking data, setting thresholds according to the interquartile range:

  • Good: > 2
  • Fair: 0.5 - 2
  • Poor: < 0.5
P3_tract$network_renewal_percent <- 1.5*P3_tract$network_renewal_percent/10

P3_tract <- 
  P3_tract %>%
  mutate(Grade_network_renewal_percent = case_when(
    network_renewal_percent < 0.5 ~ "3. Poor - <0.5%",
    network_renewal_percent >= 0.5 & network_renewal_percent <= 2 ~ "2. Fair - 0.5-2%",
    network_renewal_percent > 2 ~ "1. Good >2%"
  ))
Planned Linear Maintenance (hrs/ 100 mile)
  • Good: > 1800
  • Fair: 650 - 1800
  • Poor: < 650
P3_tract$Maintenance_100mi <- rnorm(length(P3_tract$GEOID),937,400)

P3_tract <- 
  P3_tract %>%
  mutate(Grade_maintenance = case_when(
    Maintenance_100mi < 650 ~ "3. Poor - <650",
    Maintenance_100mi >= 650 & Maintenance_100mi <= 1800 ~ "2. Fair - 650-1800",
    Maintenance_100mi > 1800 ~ "1. Good - >1800"
  ))

Overall level

Utility Resilience Index, Rescaled without poverty and income indicators

Can be filled out at the EPA VSAT tool

  • Good: > 0.75
  • Fair: 0.5 - 0.75
  • Poor: < 0.5
Infrastructure Leakage Index

The ILI is a performance indicator of how well a distribution system is managed for the control of real losses. ILI = Total Annual Real Losses/Unavoidable Annual Real Losses. It can be calculated with the AWWA Water Loss Audit Worksheet. It has a minimum realistic value of 1, and theoretical maximum ~14.

See [here] for a justification of the thresholds set below. (https://www.twdb.texas.gov/conservation/municipal/waterloss/doc/InfrastructureLeakageIndex.pdf)

  • Good: < 3
  • Fair: 3-8
  • Poor: > 8

Create Final Datasets

d <- left_join(P1_tract,st_drop_geometry(P2_tract), by="GEOID")
d <- left_join(d,P3_tract,by="GEOID")

## Make Pillar 1 Datasets
d <- d %>%
  mutate(
    P1_HBI = HBI_size_avg,
    P1_PPI = PPI,
    P1_Delinquency_rate = Delinquency_rate,
    P1_Cutoff_rate = Cutoff_Perc,
    P1_CAP = CAP_Percent,
    P1_Efficiency_Program = Incentive_rate,
    P1_Complaints_per_1000conn = complaint_per_conn,
    P1_Complaint_Resolution = complaint_addressed_percent,
    P1_Grade_HBI = Grade_HBI,
    P1_Grade_Affordability = Grade_AFF_avg,
    P1_Grade_Affordability_alt = Grade_AFF_alt,
    P1_Grade_Delinquency = Grade_delinquency,
    P1_Grade_Cutoff = Grade_cutoff,
    P1_Grade_CAP = Grade_CAP,
    P1_Grade_Efficiency = Grade_incentive,
    P1_Grade_Complaints_per1000conn = Grade_complaint_per_conn,
    P1_Grade_Complaint_Resolution = Grade_complaint_addressed_percent
  )

p1 <- select(d, GEOID, P1_HBI:P1_Grade_Complaint_Resolution)

## Make Pillar 2 Dataset 

d <- d %>% 
  mutate(
    P2_Staff_Share_Ratio = staff_share_ratio,
    P2_Grade_Staff_Share = Grade_staff_share_ratio
  )

p2 <- select(d, GEOID, P2_Staff_Share_Ratio,P2_Grade_Staff_Share)


## Make Pillar 3 Dataset

d <- d %>% mutate(
  P3_Meetings = meeting_count,
  P3_Grade_Meetings = Grade_meetings,
  P3_Breaks_100mi = breaks_100mi,
  P3_SSO_100mi = SSO_100mi,
  P3_Lead_Lines_Percent = 100*prop_lead_lines,
  P3_Lead_Lines_Replaced_Percent = prop_lead_lines_replaced,
  P3_Network_Renewal = network_renewal_percent,
  P3_Maintenance_Hrs_100mi = Maintenance_100mi,
  P3_Grade_Meetings = Grade_meetings,
  P3_Grade_Breaks = Grade_breaks,
  P3_Grade_SSO = Grade_SSO,
  P3_Grade_Lead = Grade_prop_lead_lines,
  P3_Grade_Lead_Replace = prop_lead_lines_replaced,
  P3_Grade_Network_Renewal = Grade_network_renewal_percent,
  P3_Grade_Maintenance = Grade_maintenance
)


p3 <- select(d,GEOID, P3_Meetings:P3_Grade_Maintenance)
             
             
tracts <- select(d,GEOID, P1_HBI:P3_Grade_Maintenance)


st_write(p1,"../out/Tracts_Pillar1.geojson")
## Writing layer `Tracts_Pillar1' to data source `../out/Tracts_Pillar1.geojson' using driver `GeoJSON'
## Writing 45 features with 18 fields and geometry type Multi Polygon.
write.csv(st_drop_geometry(p1),"../out/Tracts_Pillar1.csv")

st_write(p2,"../out/Tracts_Pillar2.geojson")
## Writing layer `Tracts_Pillar2' to data source `../out/Tracts_Pillar2.geojson' using driver `GeoJSON'
## Writing 45 features with 3 fields and geometry type Multi Polygon.
write.csv(st_drop_geometry(p2),"../out/Tracts_Pillar2.csv")

st_write(p3,"../out/Tracts_Pillar3.geojson")
## Writing layer `Tracts_Pillar3' to data source `../out/Tracts_Pillar3.geojson' using driver `GeoJSON'
## Writing 45 features with 15 fields and geometry type Multi Polygon.
write.csv(st_drop_geometry(p3),"../out/Tracts_Pillar3.csv")

st_write(tracts,"../out/Tracts_Pillars_All.geojson")
## Writing layer `Tracts_Pillars_All' to data source `../out/Tracts_Pillars_All.geojson' using driver `GeoJSON'
## Writing 45 features with 34 fields and geometry type Multi Polygon.
write.csv(st_drop_geometry(tracts),"../out/Tracts_Pillars_All.csv")